library(tidyverse) # data manipulation
library(ggpubr) # producing data exploratory plots
library(modelsummary) # descriptive data
library(glmmTMB) # running generalised mixed models
library(DHARMa) # model diagnostics
library(performance) # model diagnostics
library(ggeffects) # partial effect plots
library(car) # running Anova on model
library(emmeans) # post-hoc analysis m1 <- read_csv("import_data/1_month_size_data_2022_2023.csv") |>
mutate(across(1:15,factor)) |>
mutate(STANDARD_LENGTH =LENGTH,
.keep = "unused") |>
select(!(NOTES)) |>
select(1:15,"STANDARD_LENGTH","MASS")
m2 <- read_csv("import_data/2_month_size_data_2022_2023.csv") |>
mutate(across(1:15,factor)) |>
mutate(STANDARD_LENGTH=LENGTH,
.keep = "unused") |>
select(!(NOTES)) |>
select(1:15,"STANDARD_LENGTH","MASS")
m2.5 <- read_csv("import_data/2-5_month_size_data_2022_2023.csv") |>
mutate(across(1:15,factor)) |>
mutate(STANDARD_LENGTH =LENGTH,
.keep = "unused") |>
select(!(NOTES)) |>
select(1:15,"STANDARD_LENGTH","MASS")
adult <- read_csv("import_data/adult_size_2022_2023.csv") |>
mutate(across(1:3,factor),
MALE = FISH_ID,
FEMALE = FISH_ID,
POPULATION = str_sub(FISH_ID, 2,4),
POPULATION = case_when(POPULATION == "ARL" ~ "Arlington Reef",
POPULATION == "SUD" ~ "Sudbury Reef",
POPULATION == "VLA" ~ "Vlassof cay",
POPULATION == "PRE" ~ "Pretty patches",
TRUE ~ POPULATION)) |>
left_join(select(m1, c("MALE","TEMPERATURE")),
by="MALE") |>
left_join(select(m1, c("FEMALE","TEMPERATURE")),
by="FEMALE") |>
distinct() |>
mutate(TEMPERATURE = coalesce(TEMPERATURE.x, TEMPERATURE.y)) |>
drop_na(TEMPERATURE) |>
select(-c("TEMPERATURE.x","TEMPERATURE.y"))m1_df <- m1 |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS.y,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)
m2_df <- m2 |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS.y,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2)
m2.5_df <- m2.5 |>
left_join(select(adult, c("MALE", "SL", "MASS")),
by ="MALE") |>
mutate(SL_MALE =SL,
MASS_MALE =MASS.y,
.keep = "unused") |>
left_join(select(adult, c("FEMALE", "SL", "MASS")),
by ="FEMALE") |>
mutate(SL_FEMALE =SL,
MASS_FEMALE =MASS,
.keep ="unused") |>
mutate(SL_MIDPOINT = (SL_MALE+SL_FEMALE)/2,
MASS_MIDPOINT = (MASS_MALE+MASS_FEMALE)/2) |>
drop_na(SL_MALE)plot1 <- ggplot(m1_df, aes(x=MASS_MALE, y=MASS.x, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
ylim(0,0.15) +
theme_classic()
plot2 <- ggplot(m1_df, aes(x=MASS_FEMALE, y=MASS.x, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
ylim(0,0.15) +
theme_classic()
plot3 <- ggplot(m1_df, aes(x=MASS_MIDPOINT, y=MASS.x, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
ylim(0,0.15) +
theme_classic()
ggarrange(plot1, plot2, plot3,
nrow =1,
ncol =3,
common.legend = TRUE)plot1 <- ggplot(m1_df, aes(x=SL_MALE, y=STANDARD_LENGTH, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
theme_classic()
plot2 <- ggplot(m1_df, aes(x=SL_FEMALE, y=STANDARD_LENGTH, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
theme_classic()
plot3 <- ggplot(m1_df, aes(x=SL_MIDPOINT, y=STANDARD_LENGTH, color=TEMPERATURE)) +
geom_point(alpha=0.05) +
stat_smooth(method = "lm") +
theme_classic()
ggarrange(plot1, plot2, plot3,
nrow =1,
ncol =3,
common.legend = TRUE)| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 8 | 8 | 8 |
| Pretty patches | 4 | 6 | 6 |
| Sudbury Reef | 4 | 4 | 2 |
| Vlassof cay | 6 | 2 | 6 |
| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 231 | 105 | 202 |
| Pretty Patches | 116 | 82 | 142 |
| Sudbury Reef | 117 | 55 | 47 |
| Vlassof Cay | 114 | 60 | 120 |
| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 198 | 108 | 152 |
| Pretty Patches | 113 | 83 | 134 |
| Sudbury Reef | 113 | 60 | 56 |
| Vlassof Cay | 77 | 58 | 113 |
| POPULATION | 27 | 28.5 | 30 |
|---|---|---|---|
| Arlington Reef | 239 | 103 | 211 |
| Pretty Patches | 100 | 83 | 133 |
| Sudbury Reef | 111 | 53 | 50 |
| Vlassof Cay | 102 | 60 | 106 |
datasummary(Factor(TEMPERATURE) ~ MASS * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(adult, MASS),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 21 | 46.93 | 50.63 | 29.85 | 59.93 | 10.30 | ▇▁▃▄▄▆▄ |
| 28.5 | 20 | 38.81 | 42.36 | 16.28 | 53.26 | 10.26 | ▁▁▄▃▄▇▆▁ |
| 30 | 22 | 39.94 | 39.62 | 23.91 | 57.31 | 9.43 | ▅▇▂▇▇▇▂▇▅▂ |
datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m1_df, MASS.x),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 437 | 0.06 | 0.06 | 0.01 | 0.22 | 0.03 | ▃▇▇▄▂▁ |
| 28.5 | 248 | 0.07 | 0.06 | 0.02 | 0.19 | 0.02 | ▁▅▇▄▃ |
| 30 | 389 | 0.07 | 0.07 | 0.01 | 0.15 | 0.02 | ▄▅▇▆▅▃▂▁ |
datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m2_df, MASS.x),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 460 | 0.27 | 0.27 | 0.03 | 0.68 | 0.10 | ▁▂▄▇▆▃▁ |
| 28.5 | 297 | 0.28 | 0.27 | 0.06 | 0.64 | 0.10 | ▁▃▇▇▇▄▁▁▁ |
| 30 | 425 | 0.28 | 0.27 | 0.04 | 0.62 | 0.11 | ▂▂▅▇▇▄▂▁▁ |
datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m2.5_df, MASS.x),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 529 | 0.46 | 0.46 | 0.04 | 1.00 | 0.17 | ▁▂▄▇▇▆▄▁▁ |
| 28.5 | 293 | 0.45 | 0.44 | 0.08 | 1.00 | 0.18 | ▂▄▆▇▆▅▃▂▁ |
| 30 | 477 | 0.45 | 0.45 | 0.08 | 0.96 | 0.15 | ▁▂▄▆▇▄▂▁ |
datasummary(Factor(TEMPERATURE) ~ MASS * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(adult, MASS),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 21 | 46.93 | 50.63 | 29.85 | 59.93 | 10.30 | ▇▁▃▄▄▆▄ |
| 28.5 | 20 | 38.81 | 42.36 | 16.28 | 53.26 | 10.26 | ▁▁▄▃▄▇▆▁ |
| 30 | 22 | 39.94 | 39.62 | 23.91 | 57.31 | 9.43 | ▅▇▂▇▇▇▂▇▅▂ |
datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m1_df, MASS.x),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 437 | 0.06 | 0.06 | 0.01 | 0.22 | 0.03 | ▃▇▇▄▂▁ |
| 28.5 | 248 | 0.07 | 0.06 | 0.02 | 0.19 | 0.02 | ▁▅▇▄▃ |
| 30 | 389 | 0.07 | 0.07 | 0.01 | 0.15 | 0.02 | ▄▅▇▆▅▃▂▁ |
datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m2_df, MASS.x),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 460 | 0.27 | 0.27 | 0.03 | 0.68 | 0.10 | ▁▂▄▇▆▃▁ |
| 28.5 | 297 | 0.28 | 0.27 | 0.06 | 0.64 | 0.10 | ▁▃▇▇▇▄▁▁▁ |
| 30 | 425 | 0.28 | 0.27 | 0.04 | 0.62 | 0.11 | ▂▂▅▇▇▄▂▁▁ |
datasummary(Factor(TEMPERATURE) ~ MASS.x * (NUnique + mean + median + min + max + sd + Histogram),
data = drop_na(m2.5_df, MASS.x),
fmt = "%.2f")| TEMPERATURE | NUnique | mean | median | min | max | sd | Histogram |
|---|---|---|---|---|---|---|---|
| 27 | 529 | 0.46 | 0.46 | 0.04 | 1.00 | 0.17 | ▁▂▄▇▇▆▄▁▁ |
| 28.5 | 293 | 0.45 | 0.44 | 0.08 | 1.00 | 0.18 | ▂▄▆▇▆▅▃▂▁ |
| 30 | 477 | 0.45 | 0.45 | 0.08 | 0.96 | 0.15 | ▁▂▄▆▇▄▂▁ |
modelNULL <- glmmTMB(MASS.x ~ 1,
family=gaussian(),
data =m1_df)
model1 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER),
family=gaussian(),
data =m1_df)
model2 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|LEVEL),
family=gaussian(),
data = m1_df)
model3 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER),
family=gaussian(),
data = m1_df)
model4 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|REGION),
family=gaussian(),
data = m1_df)
model5 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m1_df)
model6 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m1_df)
AIC(modelNULL, model1, model2, model3, model4, model5, model6) modelNULL <- glmmTMB(MASS.x ~ 1,
family=gaussian(),
data =m2_df)
model1 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER),
family=gaussian(),
data =m2_df)
model2 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|LEVEL),
family=gaussian(),
data = m2_df)
model3 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER),
family=gaussian(),
data = m2_df)
model4 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|REGION),
family=gaussian(),
data = m2_df)
model5 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2_df)
model6 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2_df)
AIC(modelNULL, model1, model2, model3, model4, model5, model6) modelNULL <- glmmTMB(MASS.x ~ 1,
family=gaussian(),
data =m2.5_df)
model1 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER),
family=gaussian(),
data =m2.5_df)
model2 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|LEVEL),
family=gaussian(),
data = m2.5_df)
model3 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER),
family=gaussian(),
data = m2.5_df)
model4 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|REGION),
family=gaussian(),
data = m2.5_df)
model5 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2.5_df)
model6 <- glmmTMB(MASS.x ~ (1|CLUTCH_NUMBER) + (1|CLUTCH_ORDER) + (1|REGION) + (1|POPULATION),
family=gaussian(),
data = m2.5_df)
AIC(modelNULL, model1, model2, model3, model4, model5, model6) For mass measurements at different time periods, including 1, 2, and 2.5 months the best model is the most simply model (i.e., model1), where the only random factor that is present is CLUTCH_NUMBER.
Now that we have figured out which random factors will be included within out generalized linear mixed effects model we can start to explore different hypothesese by adding in our fixed factors - covariates.
Fixed factors that will be included will be those that are essential to answering the initial research question based on heiritability of traits between offspring and parental fish - labelled as MALE and FEMALE in the dataframe as well as their combined score MIDPOINT, if applicable. TEMPERATURE is also essential to answering the main research question that looks to see if heritability changes at different temperatures.
Our main research hypothesis will be modelled using the formula below”
An alternative research hypothesis will will test will include an interaction with PARENTAL_DAYS_IN_TEMPERATURE to see if heritability was affect by how long adults spent at experimental temperatures. This model may look something like:
Lets start fitting models:
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
Model1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.748 0.912 0.944 0.476 0.628 0.504 0.848 0.7 0.264 0.884 0.248 0.532 0.524 0.704 0.464 0.312 0.7 0.544 0.452 0.84 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.058365, p-value = 0.0001575
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0128, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 18, observations = 1387, p-value = 0.04717
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.007709021 0.020432992
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01297765
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.058365, p-value = 0.0001575
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0128, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 18, observations = 1387, p-value = 0.04717
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.007709021 0.020432992
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01297765
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_MALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m1_df
##
## AIC BIC logLik deviance df.resid
## -7014.2 -6972.3 3515.1 -7030.2 1379
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.0003148 0.01774
## Residual 0.0003268 0.01808
## Number of obs: 1387, groups: CLUTCH_NUMBER, 50
##
## Dispersion estimate for gaussian family (sigma^2): 0.000327
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.0302112 0.0191034 1.581 0.1138
## MASS_MALE 0.0006713 0.0004056 1.655 0.0979 .
## TEMPERATURE28.5 0.0336082 0.0328357 1.024 0.3061
## TEMPERATURE30 0.0306570 0.0249225 1.230 0.2187
## MASS_MALE:TEMPERATURE28.5 -0.0005629 0.0007900 -0.713 0.4761
## MASS_MALE:TEMPERATURE30 -0.0004390 0.0005737 -0.765 0.4441
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) -0.0072308181 0.0676532115 0.0302111967
## MASS_MALE -0.0001235985 0.0014661839 0.0006712927
## TEMPERATURE28.5 -0.0307486790 0.0979650734 0.0336081972
## TEMPERATURE30 -0.0181902804 0.0795041954 0.0306569575
## MASS_MALE:TEMPERATURE28.5 -0.0021112670 0.0009854722 -0.0005628974
## MASS_MALE:TEMPERATURE30 -0.0015633965 0.0006852967 -0.0004390499
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.0144796866 0.0217422563 0.0177431975
## # R2 for Mixed Models
##
## Conditional R2: 0.516
## Marginal R2: 0.049
m1.mass <- emmeans(model1a, ~ MASS_MALE*TEMPERATURE,
at =list(MASS_MALE=seq(from =min(m1_df$MASS_MALE), to =max(m1_df$MASS_MALE), by=.25)))
m1.mass.df <- as.data.frame(m1.mass)
m1.mass.obs <- drop_na(m1_df, MASS_MALE, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m1.mass.obs.summarize <- m1.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_MALE, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.mass.df, aes(x=MASS_MALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_pointrange(data = m1.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE MASS (g)") +
ylab("OFFSPRING MASS (g)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
The null model appears better than the models that we used. Let’s explore the data bit more and see if we can find a reason for this. Let’s start by looking at a basic histogram of our data.
There appears to be a left skew within our data. Let’s see if this can be better modelled with a Gamma distribution. If not we can try to incorporate transformations to our response variable. The model validations below also could use some improving.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.02 0.132 0.056 0.096 0.392 0.284 0.212 0.552 0.18 0.336 0.372 0.724 0.648 0.532 0.132 0.404 0.6 0.252 0.268 0.484 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.03042, p-value = 0.193
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99807, p-value = 0.992
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 11, observations = 1263, p-value = 0.7497
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00435550 0.01553001
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.008709422
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.03042, p-value = 0.193
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99807, p-value = 0.992
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 11, observations = 1263, p-value = 0.7497
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00435550 0.01553001
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.008709422
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_MALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2_df
##
## AIC BIC logLik deviance df.resid
## -2285.4 -2244.2 1150.7 -2301.4 1255
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.001475 0.03841
## Residual 0.008888 0.09428
## Number of obs: 1263, groups: CLUTCH_NUMBER, 47
##
## Dispersion estimate for gaussian family (sigma^2): 0.00889
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.3139182 0.0490782 6.396 1.59e-10 ***
## MASS_MALE -0.0009611 0.0010338 -0.930 0.353
## TEMPERATURE28.5 -0.0360441 0.0793221 -0.454 0.650
## TEMPERATURE30 -0.0130914 0.0638280 -0.205 0.837
## MASS_MALE:TEMPERATURE28.5 0.0010760 0.0018901 0.569 0.569
## MASS_MALE:TEMPERATURE30 0.0003770 0.0014481 0.260 0.795
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.217726762 0.410109676 0.3139182191
## MASS_MALE -0.002987347 0.001065120 -0.0009611137
## TEMPERATURE28.5 -0.191512649 0.119424359 -0.0360441451
## TEMPERATURE30 -0.138192097 0.112009226 -0.0130914353
## MASS_MALE:TEMPERATURE28.5 -0.002628522 0.004780547 0.0010760123
## MASS_MALE:TEMPERATURE30 -0.002461237 0.003215314 0.0003770389
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.029899653 0.049340932 0.0384093314
## # R2 for Mixed Models
##
## Conditional R2: 0.149
## Marginal R2: 0.007
m2.mass <- emmeans(model1a, ~ MASS_MALE*TEMPERATURE,
at =list(MASS_MALE=seq(from =min(m2_df$MASS_MALE), to =max(m2_df$MASS_MALE), by=.25)))
m2.mass.df <- as.data.frame(m2.mass)
m2.mass.obs <- drop_na(m2_df, MASS_MALE, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.mass.obs.summarize <- m2.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_MALE, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.mass.df, aes(x=MASS_MALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()Once again the NULL model seems to outperform our hypothesis testing models. Let’s follow the steps that we conducted for 2-month data and appy a log transformation to our dataset to see if it improved the model.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.936 0.876 0.696 0.428 0.628 0.792 0.84 0.248 0.904 0.272 0.976 0.12 0.684 0.812 0.916 0.66 0.868 1 0.836 0.168 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.030135, p-value = 0.1731
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.96289, p-value = 0.52
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 8, observations = 1347, p-value = 0.5374
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002567476 0.011668738
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.005939124
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.030135, p-value = 0.1731
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.96289, p-value = 0.52
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 8, observations = 1347, p-value = 0.5374
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002567476 0.011668738
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.005939124
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_MALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2.5_df
##
## AIC BIC logLik deviance df.resid
## -1173.9 -1132.3 595.0 -1189.9 1339
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.005084 0.0713
## Residual 0.022483 0.1499
## Number of obs: 1347, groups: CLUTCH_NUMBER, 52
##
## Dispersion estimate for gaussian family (sigma^2): 0.0225
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.3925825 0.0816490 4.808 1.52e-06 ***
## MASS_MALE 0.0016472 0.0017364 0.949 0.343
## TEMPERATURE28.5 0.0486000 0.1398557 0.348 0.728
## TEMPERATURE30 0.0745723 0.1048126 0.711 0.477
## MASS_MALE:TEMPERATURE28.5 -0.0007853 0.0033636 -0.233 0.815
## MASS_MALE:TEMPERATURE30 -0.0022103 0.0024207 -0.913 0.361
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.232553327 0.552611697 0.3925825116
## MASS_MALE -0.001756050 0.005050412 0.0016471811
## TEMPERATURE28.5 -0.225512062 0.322712097 0.0486000174
## TEMPERATURE30 -0.130856529 0.280001173 0.0745723223
## MASS_MALE:TEMPERATURE28.5 -0.007377780 0.005807182 -0.0007852992
## MASS_MALE:TEMPERATURE30 -0.006954825 0.002534240 -0.0022102926
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.055841927 0.091045568 0.0713032954
## # R2 for Mixed Models
##
## Conditional R2: 0.192
## Marginal R2: 0.010
m1.mass <- emmeans(model1a, ~ MASS_MALE*TEMPERATURE,
at =list(MASS_MALE=seq(from =min(m2.5_df$MASS_MALE), to =max(m2.5_df$MASS_MALE), by=.25)))
m1.mass.df <- as.data.frame(m1.mass)
m1.mass.obs <- drop_na(m2.5_df, MASS_MALE, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m1.mass.obs.summarize <- m1.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_MALE, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.mass.df, aes(x=MASS_MALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m1.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
Model1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.812 0.904 0.96 0.528 0.58 0.504 0.892 0.604 0.288 0.872 0.26 0.524 0.556 0.692 0.472 0.276 0.7 0.58 0.46 0.888 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.047398, p-value = 0.005078
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0082, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 17, observations = 1330, p-value = 0.06126
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00746311 0.02038649
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01278195
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.047398, p-value = 0.005078
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0082, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 17, observations = 1330, p-value = 0.06126
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00746311 0.02038649
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01278195
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_FEMALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m1_df
##
## AIC BIC logLik deviance df.resid
## -6720.7 -6679.2 3368.4 -6736.7 1322
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.0002590 0.01609
## Residual 0.0003303 0.01817
## Number of obs: 1330, groups: CLUTCH_NUMBER, 48
##
## Dispersion estimate for gaussian family (sigma^2): 0.00033
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.0385223 0.0155090 2.484 0.013 *
## MASS_FEMALE 0.0004705 0.0003347 1.406 0.160
## TEMPERATURE28.5 0.0130221 0.0241044 0.540 0.589
## TEMPERATURE30 0.0159863 0.0257814 0.620 0.535
## MASS_FEMALE:TEMPERATURE28.5 -0.0000554 0.0005596 -0.099 0.921
## MASS_FEMALE:TEMPERATURE30 -0.0001052 0.0005902 -0.178 0.859
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.0081251480 0.068919445 0.0385222965
## MASS_FEMALE -0.0001856118 0.001126553 0.0004704707
## TEMPERATURE28.5 -0.0342216257 0.060265844 0.0130221090
## TEMPERATURE30 -0.0345443058 0.066516884 0.0159862892
## MASS_FEMALE:TEMPERATURE28.5 -0.0011521572 0.001041365 -0.0000553959
## MASS_FEMALE:TEMPERATURE30 -0.0012620507 0.001051609 -0.0001052208
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.0130535755 0.019838534 0.0160923523
## # R2 for Mixed Models
##
## Conditional R2: 0.474
## Marginal R2: 0.062
m1_df <-
m1_df |>
drop_na(MASS_FEMALE)
m1.mass <- emmeans(model1a, ~ MASS_FEMALE*TEMPERATURE,
at =list(MASS_FEMALE=seq(from =min(m1_df$MASS_FEMALE), to =max(m1_df$MASS_FEMALE), by=.25)))
m1.mass.df <- as.data.frame(m1.mass)
m1.mass.obs <- drop_na(m1_df, MASS_FEMALE, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m1.mass.obs.summarize <- m1.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.female =mean(MASS_FEMALE, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.mass.df, aes(x=MASS_FEMALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_pointrange(data = m1.mass.obs.summarize, aes(x =mean.mass.female,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL FEMALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.012 0.096 0.032 0.052 0.344 0.232 0.28 0.436 0.136 0.308 0.376 0.644 0.68 0.484 0.132 0.392 0.608 0.24 0.26 0.48 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.031149, p-value = 0.1902
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99815, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 12, observations = 1212, p-value = 0.4166
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.005126155 0.017231108
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.00990099
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.031149, p-value = 0.1902
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99815, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 12, observations = 1212, p-value = 0.4166
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.005126155 0.017231108
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.00990099
## `check_outliers()` does not yet support models of class `glmmTMB`.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.012 0.096 0.032 0.052 0.344 0.232 0.28 0.436 0.136 0.308 0.376 0.644 0.68 0.484 0.132 0.392 0.608 0.24 0.26 0.48 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.031149, p-value = 0.1902
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99815, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 12, observations = 1212, p-value = 0.4166
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.005126155 0.017231108
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.00990099
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.031149, p-value = 0.1902
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.99815, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 12, observations = 1212, p-value = 0.4166
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.005126155 0.017231108
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.00990099
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_FEMALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2_df
##
## AIC BIC logLik deviance df.resid
## -2206.1 -2165.3 1111.0 -2222.1 1204
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.001354 0.03680
## Residual 0.008810 0.09386
## Number of obs: 1212, groups: CLUTCH_NUMBER, 45
##
## Dispersion estimate for gaussian family (sigma^2): 0.00881
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.2951918 0.0416393 7.089 1.35e-12 ***
## MASS_FEMALE -0.0006774 0.0008855 -0.765 0.444
## TEMPERATURE28.5 -0.0388864 0.0618526 -0.629 0.530
## TEMPERATURE30 0.0756899 0.0703173 1.076 0.282
## MASS_FEMALE:TEMPERATURE28.5 0.0013333 0.0014213 0.938 0.348
## MASS_FEMALE:TEMPERATURE30 -0.0015112 0.0015856 -0.953 0.341
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.213580327 0.376803246 0.2951917864
## MASS_FEMALE -0.002412839 0.001058091 -0.0006773738
## TEMPERATURE28.5 -0.160115359 0.082342498 -0.0388864307
## TEMPERATURE30 -0.062129510 0.213509315 0.0756899028
## MASS_FEMALE:TEMPERATURE28.5 -0.001452316 0.004118892 0.0013332883
## MASS_FEMALE:TEMPERATURE30 -0.004618824 0.001596426 -0.0015111992
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.028369174 0.047737698 0.0368005311
## # R2 for Mixed Models
##
## Conditional R2: 0.149
## Marginal R2: 0.019
m2_df <-
m2_df |>
drop_na(MASS_FEMALE)
m2.mass <- emmeans(model1a, ~ MASS_FEMALE*TEMPERATURE,
at =list(MASS_FEMALE=seq(from =min(m2_df$MASS_FEMALE), to =max(m2_df$MASS_FEMALE), by=.25)))
m2.mass.df <- as.data.frame(m2.mass)
m2.mass.obs <- drop_na(m2_df, MASS_FEMALE, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.mass.obs.summarize <- m2.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_FEMALE, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.mass.df, aes(x=MASS_FEMALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL FEMALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.948 0.896 0.744 0.5 0.628 0.824 0.828 0.288 0.928 0.34 0.976 0.184 0.66 0.812 0.896 0.716 0.916 1 0.848 0.18 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.033287, p-value = 0.1139
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.95633, p-value = 0.48
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 8, observations = 1293, p-value = 0.6366
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002674849 0.012154598
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.006187162
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.033287, p-value = 0.1139
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.95633, p-value = 0.48
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 8, observations = 1293, p-value = 0.6366
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002674849 0.012154598
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.006187162
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_FEMALE * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2.5_df
##
## AIC BIC logLik deviance df.resid
## -1147.2 -1105.8 581.6 -1163.2 1285
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.005399 0.07348
## Residual 0.022063 0.14854
## Number of obs: 1293, groups: CLUTCH_NUMBER, 50
##
## Dispersion estimate for gaussian family (sigma^2): 0.0221
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.424324 0.074321 5.709 1.13e-08 ***
## MASS_FEMALE 0.001028 0.001606 0.640 0.522
## TEMPERATURE28.5 0.052489 0.115047 0.456 0.648
## TEMPERATURE30 0.068247 0.118685 0.575 0.565
## MASS_FEMALE:TEMPERATURE28.5 -0.001076 0.002676 -0.402 0.688
## MASS_FEMALE:TEMPERATURE30 -0.002163 0.002734 -0.791 0.429
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.278657875 0.569990737 0.424324306
## MASS_FEMALE -0.002119686 0.004176442 0.001028378
## TEMPERATURE28.5 -0.172999022 0.277976300 0.052488639
## TEMPERATURE30 -0.164371146 0.300865760 0.068247307
## MASS_FEMALE:TEMPERATURE28.5 -0.006321504 0.004169887 -0.001075809
## MASS_FEMALE:TEMPERATURE30 -0.007522669 0.003195872 -0.002163398
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.057452294 0.093971308 0.073476985
## # R2 for Mixed Models
##
## Conditional R2: 0.204
## Marginal R2: 0.009
m2.5_df <- m2.5_df |>
drop_na(MASS_FEMALE)
m2.5.mass <- emmeans(model1a, ~ MASS_FEMALE*TEMPERATURE,
at =list(MASS_FEMALE=seq(from =min(m2.5_df$MASS_FEMALE), to =max(m2.5_df$MASS_FEMALE), by=.25)))
m2.5.mass.df <- as.data.frame(m2.5.mass)
m2.5.mass.obs <- drop_na(m2.5_df, MASS_FEMALE, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.5.mass.obs.summarize <- m2.5.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_FEMALE, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.5.mass.df, aes(x=MASS_FEMALE, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.5.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
Model1a was selected as the best model and will be used going forward.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.816 0.908 0.96 0.536 0.584 0.504 0.892 0.604 0.288 0.872 0.268 0.524 0.56 0.696 0.476 0.28 0.708 0.588 0.472 0.892 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.045774, p-value = 0.007594
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0081, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 17, observations = 1330, p-value = 0.06126
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00746311 0.02038649
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01278195
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.045774, p-value = 0.007594
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 1.0081, p-value = 0.872
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 17, observations = 1330, p-value = 0.06126
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.00746311 0.02038649
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.01278195
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_MIDPOINT * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m1_df
##
## AIC BIC logLik deviance df.resid
## -6721.0 -6679.4 3368.5 -6737.0 1322
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.0002575 0.01605
## Residual 0.0003303 0.01817
## Number of obs: 1330, groups: CLUTCH_NUMBER, 48
##
## Dispersion estimate for gaussian family (sigma^2): 0.00033
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.0329822 0.0166291 1.983 0.0473 *
## MASS_MIDPOINT 0.0005856 0.0003555 1.647 0.0995 .
## TEMPERATURE28.5 0.0213380 0.0274504 0.777 0.4370
## TEMPERATURE30 0.0235153 0.0247728 0.949 0.3425
## MASS_MIDPOINT:TEMPERATURE28.5 -0.0002358 0.0006499 -0.363 0.7167
## MASS_MIDPOINT:TEMPERATURE30 -0.0002539 0.0005741 -0.442 0.6583
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.0003898950 0.0655745936 0.0329822443
## MASS_MIDPOINT -0.0001111277 0.0012822649 0.0005855686
## TEMPERATURE28.5 -0.0324638480 0.0751397589 0.0213379555
## TEMPERATURE30 -0.0250384419 0.0720690390 0.0235152985
## MASS_MIDPOINT:TEMPERATURE28.5 -0.0015095584 0.0010378900 -0.0002358342
## MASS_MIDPOINT:TEMPERATURE30 -0.0013790389 0.0008713143 -0.0002538623
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.0130168709 0.0197849204 0.0160479829
## # R2 for Mixed Models
##
## Conditional R2: 0.473
## Marginal R2: 0.063
m1_df <-
m1_df |>
drop_na(MASS_MIDPOINT)
m1.mass <- emmeans(model1a, ~ MASS_MIDPOINT*TEMPERATURE,
at =list(MASS_MIDPOINT=seq(from =min(m1_df$MASS_MIDPOINT), to =max(m1_df$MASS_MIDPOINT), by=.25)))
m1.mass.df <- as.data.frame(m1.mass)
m1.mass.obs <- drop_na(m1_df, MASS_MIDPOINT, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m1.mass.obs.summarize <- m1.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.female =mean(MASS_MIDPOINT, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m1.mass.df, aes(x=MASS_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm") +
geom_pointrange(data = m1.mass.obs.summarize, aes(x =mean.mass.female,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## `geom_smooth()` using formula = 'y ~ x'
## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.012 0.116 0.032 0.052 0.348 0.236 0.292 0.436 0.14 0.324 0.388 0.656 0.688 0.496 0.144 0.404 0.616 0.252 0.268 0.496 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032673, p-value = 0.1503
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.998, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 11, observations = 1212, p-value = 0.6256
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.004539118 0.016181158
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.009075908
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032673, p-value = 0.1503
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.998, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 11, observations = 1212, p-value = 0.6256
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.004539118 0.016181158
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.009075908
## `check_outliers()` does not yet support models of class `glmmTMB`.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.012 0.116 0.032 0.052 0.348 0.236 0.292 0.436 0.14 0.324 0.388 0.656 0.688 0.496 0.144 0.404 0.616 0.252 0.268 0.496 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032673, p-value = 0.1503
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.998, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 11, observations = 1212, p-value = 0.6256
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.004539118 0.016181158
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.009075908
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032673, p-value = 0.1503
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.998, p-value = 0.984
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 11, observations = 1212, p-value = 0.6256
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.004539118 0.016181158
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.009075908
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_MIDPOINT * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2_df
##
## AIC BIC logLik deviance df.resid
## -2204.5 -2163.7 1110.3 -2220.5 1204
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.001413 0.03759
## Residual 0.008810 0.09386
## Number of obs: 1212, groups: CLUTCH_NUMBER, 45
##
## Dispersion estimate for gaussian family (sigma^2): 0.00881
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.3011127 0.0459019 6.560 5.38e-11 ***
## MASS_MIDPOINT -0.0007978 0.0009697 -0.823 0.411
## TEMPERATURE28.5 -0.0394936 0.0717508 -0.550 0.582
## TEMPERATURE30 0.0339443 0.0690491 0.492 0.623
## MASS_MIDPOINT:TEMPERATURE28.5 0.0013269 0.0016817 0.789 0.430
## MASS_MIDPOINT:TEMPERATURE30 -0.0006032 0.0015769 -0.383 0.702
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.211146607 0.391078838 0.3011127226
## MASS_MIDPOINT -0.002698241 0.001102740 -0.0007977506
## TEMPERATURE28.5 -0.180122472 0.101135301 -0.0394935859
## TEMPERATURE30 -0.101389441 0.169277952 0.0339442559
## MASS_MIDPOINT:TEMPERATURE28.5 -0.001969309 0.004623021 0.0013268560
## MASS_MIDPOINT:TEMPERATURE30 -0.003693879 0.002487493 -0.0006031930
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.029040752 0.048651868 0.0375883869
## # R2 for Mixed Models
##
## Conditional R2: 0.150
## Marginal R2: 0.013
m2_df <-
m2_df |>
drop_na(MASS_MIDPOINT)
m2.mass <- emmeans(model1a, ~ MASS_MIDPOINT*TEMPERATURE,
at =list(MASS_MIDPOINT=seq(from =min(m2_df$MASS_MIDPOINT), to =max(m2_df$MASS_MIDPOINT), by=.25)))
m2.mass.df <- as.data.frame(m2.mass)
m2.mass.obs <- drop_na(m2_df, MASS_MIDPOINT, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.mass.obs.summarize <- m2.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_MIDPOINT, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.mass.df, aes(x=MASS_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MIDPOINT STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()## Warning in AIC.default(modelNULL, model1a, model1b, k = 12): models are not all
## fitted to the same number of observations
## Warning in BIC.default(modelNULL, model1a, model1b): models are not all fitted
## to the same number of observations
Once again the NULL model seems to outperform our hypothesis testing models. Let’s follow the steps that we conducted for 2-month data and appy a log transformation to our dataset to see if it improved the model.
## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help.
##
## Scaled residual values: 0.944 0.892 0.728 0.484 0.616 0.812 0.824 0.284 0.928 0.336 0.976 0.176 0.656 0.808 0.896 0.716 0.912 0.996 0.848 0.18 ...
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032622, p-value = 0.1276
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.95693, p-value = 0.48
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 7, observations = 1293, p-value = 0.4304
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002179302 0.011122433
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.005413766
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.032622, p-value = 0.1276
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.95693, p-value = 0.48
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa outlier test based on exact binomial test with approximate
## expectations
##
## data: simulationOutput
## outliers at both margin(s) = 7, observations = 1293, p-value = 0.4304
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
## 0.002179302 0.011122433
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 )
## 0.005413766
## `check_outliers()` does not yet support models of class `glmmTMB`.
## NOTE: Results may be misleading due to involvement in interactions
## Family: gaussian ( identity )
## Formula: MASS.x ~ MASS_MIDPOINT * TEMPERATURE + (1 | CLUTCH_NUMBER)
## Data: m2.5_df
##
## AIC BIC logLik deviance df.resid
## -1147.3 -1106.0 581.7 -1163.3 1285
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## CLUTCH_NUMBER (Intercept) 0.005385 0.07338
## Residual 0.022063 0.14854
## Number of obs: 1293, groups: CLUTCH_NUMBER, 50
##
## Dispersion estimate for gaussian family (sigma^2): 0.0221
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 0.409316 0.079890 5.123 3e-07 ***
## MASS_MIDPOINT 0.001344 0.001711 0.786 0.432
## TEMPERATURE28.5 0.053685 0.131473 0.408 0.683
## TEMPERATURE30 0.072453 0.115646 0.627 0.531
## MASS_MIDPOINT:TEMPERATURE28.5 -0.001039 0.003116 -0.334 0.739
## MASS_MIDPOINT:TEMPERATURE30 -0.002254 0.002692 -0.837 0.402
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 2.5 % 97.5 % Estimate
## (Intercept) 0.252733488 0.565897613 0.409315550
## MASS_MIDPOINT -0.002008261 0.004696797 0.001344268
## TEMPERATURE28.5 -0.203996428 0.311366662 0.053685117
## TEMPERATURE30 -0.154208910 0.299114069 0.072452579
## MASS_MIDPOINT:TEMPERATURE28.5 -0.007146013 0.005067192 -0.001039411
## MASS_MIDPOINT:TEMPERATURE30 -0.007531169 0.003022221 -0.002254474
## Std.Dev.(Intercept)|CLUTCH_NUMBER 0.057382814 0.093841327 0.073381738
## # R2 for Mixed Models
##
## Conditional R2: 0.204
## Marginal R2: 0.010
m2.5_df <- m2.5_df |>
drop_na(MASS_MIDPOINT)
m2.5.mass <- emmeans(model1a, ~ MASS_MIDPOINT*TEMPERATURE,
at =list(MASS_MIDPOINT=seq(from =min(m2.5_df$MASS_MIDPOINT), to =max(m2.5_df$MASS_MIDPOINT), by=.25)))
m2.5.mass.df <- as.data.frame(m2.5.mass)
m2.5.mass.obs <- drop_na(m2.5_df, MASS_MIDPOINT, MASS.x) |>
mutate(Pred =predict(model1a, re.form=NA, type ='response'),
Resid =residuals(model1a, type ='response'),
Fit =Pred+Resid)
m2.5.mass.obs.summarize <- m2.5.mass.obs |>
group_by(CLUTCH_NUMBER, TEMPERATURE) |>
summarise(mean.mass =mean(Fit, na.rm=TRUE),
mean.mass.male =mean(MASS_MIDPOINT, na.rm = TRUE),
sd.mass =sd(Fit, na.rm =TRUE),
n.mass = n()) |>
mutate(se.mass = sd.mass / sqrt(n.mass),
lower.ci.mass =mean.mass - qt(1-(0.05/2), n.mass -1) * se.mass,
upper.ci.mass =mean.mass + qt(1-(0.05/2), n.mass -1) * se.mass) |>
ungroup()## `summarise()` has grouped output by 'CLUTCH_NUMBER'. You can override using the
## `.groups` argument.
ggplot(data = m2.5.mass.df, aes(x=MASS_MIDPOINT, y=emmean)) +
stat_smooth(aes(color=TEMPERATURE),
method = "lm",
formula = y ~ x) +
geom_pointrange(data = m2.5.mass.obs.summarize, aes(x =mean.mass.male,
y =mean.mass,
ymin =lower.ci.mass,
ymax =upper.ci.mass,
color = TEMPERATURE)) +
scale_color_manual(values = c("#69d7d8","#ff9c56", "#903146")) +
scale_fill_manual(values =c("#69d7d8","#ff9c56", "#903146")) +
facet_wrap(~TEMPERATURE)+
xlab("PARENTAL MALE STANDARD LENGTH (mm)") +
ylab("OFFSPRING STANDARD LENGTH (mm)") +
ggtitle("Offspring-male relationship") +
theme_classic()